gusucode.com > 支持向量机工具箱 - LIBSVM OSU_SVM LS_SVM源码程序 > 支持向量机工具箱 - LIBSVM OSU_SVM LS_SVM\stprtool\linear\anderson\eanders.m

    function [alpha,theta,solution,t,alpha1,alpha2]=...
   eanders(MI,SIGMA,J,tmax,epsilon,t,alpha1,alpha2)
% EANDERS epsilon-solution of the Genealized Anderson's task.
% [alpha,theta,solution,t,alpha1,alpha2]=...
%    eanders(MI,SIGMA,J,tmax,epsilon,t,alpha1,alpha2)
%
% EANDERS finds Epsilon-solution of the Generalized Anderson`s Task (GAT).
%   The GAT solves problem of finding separating hyperplane for two
%   classes under condition that found solution ensures minimal probability
%   of bad classification. The classes are described by conditional
%   probability p(x|k) (x - observation, k - class) with normal p.d.f.
%   whose parameters are not known, only set of possible parameters is
%   known. For more details cf. book SH10.
%
%   Epsilon-solution of the GAT ensures that the found solution has
%   the probability of bad classification less then given limit epsilon.
%   This algorithm works iteratively until the solution is found or
%   upper limit (arg epsilon) of steps is not trespassed.
%
% Input:
% EANDERS(MI,SIGMA,J,tmax,epsilon)
%   MI [NxK] contains column vectors of mean values MI=[Mi_1,Mi_2...Mi_K],
%      where Mi_i is i-th column vector. Number K means number of normal
%      distribution parameters and positive integer N is feature 
%      space dimension.
%   SIGMA [N,(K*N)] contains covariance matrices
%      SIGMA=[Sigma_1,Sigma_2,...Sigma_K] where Sigma_i is i-th covariance
%      matrix N-by-N.
%   J [1xK] is vector of class labels J=[label_1,label_2,..,class_K] which
%      contains integers 1 for the first class and 2 for the second class.
%      Value J(i) determines to which class the i-th pair of parameters
%      M_i and Sigma_i belongs.
%   tmax [1x1] is positive integer which determines upper limit of algorithm
%      steps. If tmax=inf (infinity) infinity then the algorithm will iterate
%      until finds a solution.
%   epsilon [1x1] is desired upper bound of probability of bad classification.
%
% EANDERS(MI,SIGMA,J,tmax,epsilon,t,alpha1,alpha2) begins from state given by
%   t [1x1] begin step number.
%   alpha1 [Nx1], alpha2[Nx1], theta [1x1] are state variables of the 
%   algorithm.
%
% Returns:
%   alpha [Nx1], theta [1x1] are parameters of finding solution (decision
%      hyperplane alpha'x=theta) in step t.
%   solution [1x1] contains number 0 if solution is not found or 1 if is
%      found considering given r.
%   t [1x1] is step number in which the algorithm exited.
%
%   alpha1 [Nx1], alpha2 [Nx1] are state variables.
%
% For more details refer to book SH10.
%
% See also OANDERS, GGANDERS, GANDERS, GANDERS2.
%

% Statistical Pattern Recognition Toolbox, Vojtech Franc, Vaclav Hlavac
% (c) Czech Technical University Prague, http://cmp.felk.cvut.cz
% Written Vojtech Franc (diploma thesis) 24.10.1999
% Modifications
% 24. 6.00 V. Hlavac, comments polished.

% computes radius of ellipsoids coresponding to upper bound of prob. 
% of bad classification
r = -icdf('norm',epsilon,0,1);

% get dimension and number of distributions
N=size(MI,1);
K=size(MI,2);

if nargin < 5,
  error('Not enough input arguments.');
end

if nargin < 6,
   t=0;
end

if t==0,
   % STEP (2)
   a1set=0;
   a2set=0;
   i=0;
   while i < K & (a1set==0 | a2set==0),
      i=i+1;
      if J(i)==1 & a1set==0,
         alpha1=MI(:,i);
         a1set=1;
      elseif J(i)==2 & a2set==0,
         alpha2=MI(:,i);
         a2set=1;
      end
   end

   tmax=tmax-1;
   t=t+1;
end

solution=0;
while solution==0 & tmax > 0,
   tmax=tmax-1;

   % compute alpha and theta
   alpha=alpha1-alpha2;
   theta=0.5*(alpha1'*alpha1 - alpha2'*alpha2);

   % STEP (3)
   % find ellipsoid N(mi(i),sigma(i)) which is not conform to given r (~epsilon)

   % on start we suppose that the solution is found
   solution = 1;
   for i=1:K,
      mi = MI(:,i);
      sigma = SIGMA(:,(i-1)*N+1:i*N);

      % denominator
      aSa=sqrt( alpha'*sigma*alpha );

      if aSa ~= 0,
         if J(i)==1,
            if r >= ( alpha'*mi - theta )/aSa,
               x = mi - ( r/aSa )*sigma*alpha;
               k = min([((alpha1-alpha2)'*(alpha1-x))/( (alpha1-x)'*(alpha1-x) ), 1]);
               alpha1 = alpha1*(1-k) + x*k;
               solution = 0;
               t=t+1;
               break;
            end
         elseif J(i)==2,
            if r >= ( theta - alpha'*mi )/sqrt( alpha'*sigma*alpha ),
               x = mi + ( r/aSa )*sigma*alpha;
               k = min([((alpha2-alpha1)'*(alpha2-x))/( (alpha2-x)'*(alpha2-x)), 1]);
               alpha2 = alpha2*(1-k) + x*k;
               solution = 0;
               t=t+1;
               break;
            end
         end % if J(i)==1,...elseif J(i)==2,

      else % if aSa ~= 0,
         % solution does not exist
         solution = 0;
         tmax=0;
      end

   end % for i=1:K


end % while

% compute alpha and theta
alpha=alpha1-alpha2;
theta=0.5*(alpha1'*alpha1 - alpha2'*alpha2);